[HVM] Save/restore cleanups: don't save state for downed vcpus.
authorTim Deegan <Tim.Deegan@xensource.com>
Mon, 29 Jan 2007 16:49:29 +0000 (16:49 +0000)
committerTim Deegan <Tim.Deegan@xensource.com>
Mon, 29 Jan 2007 16:49:29 +0000 (16:49 +0000)
(Since we wouldn't load it anyway)
Also tidy up the plumbing around the hypercalls.

Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
xen/arch/x86/domctl.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/intercept.c
xen/include/asm-x86/hvm/support.h

index 9646b96507462baad582bcb62d0504eb2f292c75..fbf55fed5b4a2bf74f21fb6cf767e88fd8225d75 100644 (file)
@@ -290,7 +290,6 @@ long arch_do_domctl(
     { 
         struct hvm_domain_context *c;
         struct domain             *d;
-        struct vcpu               *v;
 
         ret = -ESRCH;
         if ( (d = get_domain_by_id(domctl->domain)) == NULL )
@@ -299,15 +298,16 @@ long arch_do_domctl(
         ret = -ENOMEM;
         if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
             goto sethvmcontext_out;
-
-        v = d->vcpu[0];
         
         ret = -EFAULT;
-
         if ( copy_from_guest(c, domctl->u.hvmcontext.ctxt, 1) != 0 )
             goto sethvmcontext_out;
 
-        ret = arch_sethvm_ctxt(v, c);
+        ret = -EINVAL;
+        if ( !is_hvm_domain(d) ) 
+            goto sethvmcontext_out;
+
+        ret = hvm_load(d, c);
 
         xfree(c);
 
@@ -321,7 +321,6 @@ long arch_do_domctl(
     { 
         struct hvm_domain_context *c;
         struct domain             *d;
-        struct vcpu               *v;
 
         ret = -ESRCH;
         if ( (d = get_domain_by_id(domctl->domain)) == NULL )
@@ -330,15 +329,14 @@ long arch_do_domctl(
         ret = -ENOMEM;
         if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
             goto gethvmcontext_out;
-
-        v = d->vcpu[0];
-
+        memset(c, 0, sizeof(*c));
+        
         ret = -ENODATA;
-        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+        if ( !is_hvm_domain(d) ) 
             goto gethvmcontext_out;
         
         ret = 0;
-        if (arch_gethvm_ctxt(v, c) == -1)
+        if (hvm_save(d, c) != 0)
             ret = -EFAULT;
 
         if ( copy_to_guest(domctl->u.hvmcontext.ctxt, c, 1) )
index d863ac769d1902c1f0f84230b93f0e8e862d7afb..f0f9caec37b2289c705447185a53de83b4e6b65e 100644 (file)
@@ -189,17 +189,15 @@ void hvm_domain_destroy(struct domain *d)
         unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 }
 
-#define HVM_VCPU_CTXT_MAGIC 0x85963130
 void hvm_save_cpu_ctxt(hvm_domain_context_t *h, void *opaque)
 {
     struct vcpu *v = opaque;
 
-    if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) {
-        hvm_put_32u(h, 0x0);
+    /* We don't need to save state for a vcpu that is down; the restore 
+     * code will leave it down if there is nothing saved. */
+    if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) 
         return;
-    }
 
-    hvm_put_32u(h, HVM_VCPU_CTXT_MAGIC);
     hvm_funcs.save_cpu_ctxt(h, opaque);
 }
 
@@ -207,13 +205,10 @@ int hvm_load_cpu_ctxt(hvm_domain_context_t *h, void *opaque, int version)
 {
     struct vcpu *v = opaque;
 
-    if ( hvm_get_32u(h) != HVM_VCPU_CTXT_MAGIC )
-        return 0;
-
     if ( hvm_funcs.load_cpu_ctxt(h, opaque, version) < 0 )
         return -EINVAL;
 
-    /* Auxiliary processors shoudl be woken immediately. */
+    /* Auxiliary processors should be woken immediately. */
     if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
         vcpu_wake(v);
 
index e5c1728b7dc6954a2ab0322a86f44ff51023bb5b..cc77241da91b330542b3d3b4e94ab0d694994437 100644 (file)
@@ -190,7 +190,7 @@ int hvm_register_savevm(struct domain *d,
     return 0;
 }
 
-int hvm_save(struct vcpu *v, hvm_domain_context_t *h)
+int hvm_save(struct domain *d, hvm_domain_context_t *h)
 {
     uint32_t len, len_pos, cur_pos;
     uint32_t eax, ebx, ecx, edx;
@@ -198,13 +198,6 @@ int hvm_save(struct vcpu *v, hvm_domain_context_t *h)
     char *chgset;
     struct hvm_save_header hdr;
 
-    if (!is_hvm_vcpu(v)) {
-        printk("hvm_save only for hvm guest!\n");
-        return -1;
-    }
-
-    memset(h, 0, sizeof(hvm_domain_context_t));
-
     hdr.magic = HVM_FILE_MAGIC;
     hdr.version = HVM_FILE_VERSION;
     cpuid(1, &eax, &ebx, &ecx, &edx);
@@ -222,7 +215,7 @@ int hvm_save(struct vcpu *v, hvm_domain_context_t *h)
     hvm_put_8u(h, len);
     hvm_put_buffer(h, chgset, len);
 
-    for(se = v->domain->arch.hvm_domain.first_se; se != NULL; se = se->next) {
+    for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) {
         /* ID string */
         len = strnlen(se->idstr, HVM_SE_IDSTR_LEN);
         hvm_put_8u(h, len);
@@ -270,7 +263,7 @@ static HVMStateEntry *find_se(struct domain *d, const char *idstr, int instance_
     return NULL;
 }
 
-int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
+int hvm_load(struct domain *d, hvm_domain_context_t *h)
 {
     uint32_t len, rec_len, rec_pos, instance_id, version_id;
     uint32_t eax, ebx, ecx, edx;
@@ -280,11 +273,7 @@ int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
     char *cur_chgset;
     int ret;
     struct hvm_save_header hdr;
-
-    if (!is_hvm_vcpu(v)) {
-        printk("hvm_load only for hvm guest!\n");
-        return -1;
-    }
+    struct vcpu *v;
 
     if (h->size >= HVM_CTXT_SIZE) {
         printk("hvm_load fail! seems hvm_domain_context overflow when hvm_save! need %"PRId32" bytes.\n", h->size);
@@ -339,6 +328,11 @@ int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
         printk("warnings: try to restore hvm guest when changeset is unavailable.\n");
 
 
+    /* Down all the vcpus: we only re-enable the ones that had state saved. */
+    for_each_vcpu(d, v) 
+        if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
+            vcpu_sleep_nosync(v);
+
     while(1) {
         if (hvm_ctxt_end(h)) {
             break;
@@ -362,7 +356,7 @@ int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
         rec_len = hvm_get_32u(h);
         rec_pos = hvm_ctxt_tell(h);
 
-        se = find_se(v->domain, idstr, instance_id);
+        se = find_se(d, idstr, instance_id);
         if (se == NULL) {
             printk("warnings: hvm load can't find device %s's instance %d!\n",
                     idstr, instance_id);
@@ -384,21 +378,6 @@ int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
     return 0;
 }
 
-int arch_gethvm_ctxt(
-    struct vcpu *v, struct hvm_domain_context *c)
-{
-    if ( !is_hvm_vcpu(v) )
-        return -1;
-
-    return hvm_save(v, c);
-
-}
-
-int arch_sethvm_ctxt(
-        struct vcpu *v, struct hvm_domain_context *c)
-{
-    return hvm_load(v, c);
-}
 
 #ifdef HVM_DEBUG_SUSPEND
 static void shpage_info(shared_iopage_t *sh)
index 2d15019bb7a48372ad2350b11669ca9076880eaa..f474756d97ec5772bc7fb722074e2a224d86437b 100644 (file)
@@ -242,11 +242,8 @@ static inline void hvm_get_buffer(hvm_domain_context_t *h, char *buf, int len)
 #define hvm_get_struct(_h, _p) \
     hvm_get_buffer((_h), (char *)(_p), sizeof(*(_p)))
 
-int hvm_save(struct vcpu*, hvm_domain_context_t *h);
-int hvm_load(struct vcpu*, hvm_domain_context_t *h);
-
-int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
-int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
+int hvm_save(struct domain *d, hvm_domain_context_t *h);
+int hvm_load(struct domain *d, hvm_domain_context_t *h);
 
 void shpage_init(struct domain *d, shared_iopage_t *sp);